From: Keir Fraser Date: Wed, 28 Oct 2009 17:08:26 +0000 (+0000) Subject: AMD IOMMU: Use global interrupt remapping table by default X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~13161 X-Git-Url: https://dgit.raspbian.org/%22http://www.example.com/cgi/%22/%22http:/www.example.com/cgi/%22?a=commitdiff_plain;h=952ae2717b43eaef6bca3fc5b984d19df6f0bbeb;p=xen.git AMD IOMMU: Use global interrupt remapping table by default Using a global interrupt remapping table shared by all devices has better compatibility with certain old BIOSes. Per-device interrupt remapping table can still be enabled by using a new parameter "amd-iommu-perdev-intremap". Signed-off-by: Wei Wang --- diff --git a/xen/drivers/passthrough/amd/iommu_acpi.c b/xen/drivers/passthrough/amd/iommu_acpi.c index b3bbc332f3..7905e12f07 100644 --- a/xen/drivers/passthrough/amd/iommu_acpi.c +++ b/xen/drivers/passthrough/amd/iommu_acpi.c @@ -29,6 +29,7 @@ extern unsigned short ivrs_bdf_entries; extern struct ivrs_mappings *ivrs_mappings; extern unsigned short last_bdf; extern int ioapic_bdf[MAX_IO_APICS]; +extern void *shared_intremap_table; static void add_ivrs_mapping_entry( u16 bdf, u16 alias_id, u8 flags, struct amd_iommu *iommu) @@ -66,10 +67,19 @@ static void add_ivrs_mapping_entry( ivrs_mappings[bdf].dte_ext_int_pass = ext_int_pass; ivrs_mappings[bdf].dte_init_pass = init_pass; - /* allocate per-device interrupt remapping table */ - if ( ivrs_mappings[alias_id].intremap_table == NULL ) - ivrs_mappings[alias_id].intremap_table = - amd_iommu_alloc_intremap_table(); + if (ivrs_mappings[alias_id].intremap_table == NULL ) + { + /* allocate per-device interrupt remapping table */ + if ( amd_iommu_perdev_intremap ) + ivrs_mappings[alias_id].intremap_table = + amd_iommu_alloc_intremap_table(); + else + { + if ( shared_intremap_table == NULL ) + shared_intremap_table = amd_iommu_alloc_intremap_table(); + ivrs_mappings[alias_id].intremap_table = shared_intremap_table; + } + } /* assgin iommu hardware */ ivrs_mappings[bdf].iommu = iommu; } diff --git a/xen/drivers/passthrough/amd/iommu_init.c b/xen/drivers/passthrough/amd/iommu_init.c index f33e9f55ae..0b8d5643b9 100644 --- a/xen/drivers/passthrough/amd/iommu_init.c +++ b/xen/drivers/passthrough/amd/iommu_init.c @@ -706,7 +706,8 @@ static int __init init_ivrs_mapping(void) ivrs_mappings[bdf].dte_ext_int_pass = IOMMU_CONTROL_DISABLED; ivrs_mappings[bdf].dte_init_pass = IOMMU_CONTROL_DISABLED; - spin_lock_init(&ivrs_mappings[bdf].intremap_lock); + if ( amd_iommu_perdev_intremap ) + spin_lock_init(&ivrs_mappings[bdf].intremap_lock); } return 0; } diff --git a/xen/drivers/passthrough/amd/iommu_intr.c b/xen/drivers/passthrough/amd/iommu_intr.c index c5b35ece4c..7de4b3a4f5 100644 --- a/xen/drivers/passthrough/amd/iommu_intr.c +++ b/xen/drivers/passthrough/amd/iommu_intr.c @@ -26,6 +26,15 @@ int ioapic_bdf[MAX_IO_APICS]; extern struct ivrs_mappings *ivrs_mappings; extern unsigned short ivrs_bdf_entries; +void *shared_intremap_table; +static DEFINE_SPINLOCK(shared_intremap_lock); + +static spinlock_t* get_intremap_lock(int req_id) +{ + return (amd_iommu_perdev_intremap ? + &ivrs_mappings[req_id].intremap_lock: + &shared_intremap_lock); +} static int get_intremap_requestor_id(int bdf) { @@ -101,9 +110,10 @@ static void update_intremap_entry_from_ioapic( u8 delivery_mode, dest, vector, dest_mode; struct IO_APIC_route_entry *rte = ioapic_rte; int req_id; + spinlock_t *lock; req_id = get_intremap_requestor_id(bdf); - + lock = get_intremap_lock(req_id); /* only remap interrupt vector when lower 32 bits in ioapic ire changed */ if ( likely(!rte_upper) ) { @@ -112,10 +122,10 @@ static void update_intremap_entry_from_ioapic( dest_mode = rte->dest_mode; dest = rte->dest.logical.logical_dest; - spin_lock_irqsave(&ivrs_mappings[req_id].intremap_lock, flags); + spin_lock_irqsave(lock, flags); entry = (u32*)get_intremap_entry(req_id, vector, delivery_mode); update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest); - spin_unlock_irqrestore(&ivrs_mappings[req_id].intremap_lock, flags); + spin_unlock_irqrestore(lock, flags); if ( iommu->enabled ) { @@ -136,6 +146,7 @@ int __init amd_iommu_setup_ioapic_remapping(void) u8 delivery_mode, dest, vector, dest_mode; u16 bdf, req_id; struct amd_iommu *iommu; + spinlock_t *lock; /* Read ioapic entries and update interrupt remapping table accordingly */ for ( apic = 0; apic < nr_ioapics; apic++ ) @@ -159,15 +170,17 @@ int __init amd_iommu_setup_ioapic_remapping(void) } req_id = get_intremap_requestor_id(bdf); + lock = get_intremap_lock(req_id); + delivery_mode = rte.delivery_mode; vector = rte.vector; dest_mode = rte.dest_mode; dest = rte.dest.logical.logical_dest; - spin_lock_irqsave(&ivrs_mappings[req_id].intremap_lock, flags); + spin_lock_irqsave(lock, flags); entry = (u32*)get_intremap_entry(req_id, vector, delivery_mode); update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest); - spin_unlock_irqrestore(&ivrs_mappings[req_id].intremap_lock, flags); + spin_unlock_irqrestore(lock, flags); if ( iommu->enabled ) { @@ -234,13 +247,14 @@ static void update_intremap_entry_from_msi_msg( unsigned long flags; u32* entry; u16 bdf, req_id, alias_id; - u8 delivery_mode, dest, vector, dest_mode; + spinlock_t *lock; bdf = (pdev->bus << 8) | pdev->devfn; req_id = get_dma_requestor_id(bdf); + lock = get_intremap_lock(req_id); - spin_lock_irqsave(&ivrs_mappings[req_id].intremap_lock, flags); + spin_lock_irqsave(lock, flags); dest_mode = (msg->address_lo >> MSI_ADDR_DESTMODE_SHIFT) & 0x1; delivery_mode = (msg->data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x1; vector = (msg->data >> MSI_DATA_VECTOR_SHIFT) & MSI_DATA_VECTOR_MASK; @@ -248,7 +262,7 @@ static void update_intremap_entry_from_msi_msg( entry = (u32*)get_intremap_entry(req_id, vector, delivery_mode); update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest); - spin_unlock_irqrestore(&ivrs_mappings[req_id].intremap_lock, flags); + spin_unlock_irqrestore(lock, flags); /* * In some special cases, a pci-e device(e.g SATA controller in IDE mode) @@ -257,14 +271,15 @@ static void update_intremap_entry_from_msi_msg( * devices. */ alias_id = get_intremap_requestor_id(bdf); + lock = get_intremap_lock(alias_id); if ( ( bdf != alias_id ) && ivrs_mappings[alias_id].intremap_table != NULL ) { - spin_lock_irqsave(&ivrs_mappings[alias_id].intremap_lock, flags); + spin_lock_irqsave(lock, flags); entry = (u32*)get_intremap_entry(alias_id, vector, delivery_mode); update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest); invalidate_interrupt_table(iommu, alias_id); - spin_unlock_irqrestore(&ivrs_mappings[alias_id].intremap_lock, flags); + spin_unlock_irqrestore(lock, flags); } if ( iommu->enabled ) diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index 2be485bb50..1e59a16e19 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -45,6 +45,7 @@ int iommu_snoop = 0; int iommu_qinval = 0; int iommu_intremap = 0; int amd_iommu_debug = 0; +int amd_iommu_perdev_intremap = 0; static void __init parse_iommu_param(char *s) { @@ -54,6 +55,7 @@ static void __init parse_iommu_param(char *s) iommu_qinval = 1; iommu_intremap = 1; amd_iommu_debug = 0; + amd_iommu_perdev_intremap = 0; do { ss = strchr(s, ','); @@ -79,6 +81,8 @@ static void __init parse_iommu_param(char *s) iommu_intremap = 0; else if ( !strcmp(s, "amd-iommu-debug") ) amd_iommu_debug = 1; + else if ( !strcmp(s, "amd-iommu-perdev-intremap") ) + amd_iommu_perdev_intremap = 1; s = ss + 1; } while ( ss ); diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h index b16e2e9b21..26d859659e 100644 --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h @@ -33,6 +33,7 @@ #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK) extern int amd_iommu_debug; +extern int amd_iommu_perdev_intremap; #define AMD_IOMMU_DEBUG(fmt, args...) \ do \